#define INTEL_SRAR_DATA_LOAD 0x134
#define INTEL_SRAR_INSTR_FETCH 0x150
-/* Thermal Hanlding */
#ifdef CONFIG_X86_MCE_THERMAL
-static void unexpected_thermal_interrupt(struct cpu_user_regs *regs)
-{
- printk(KERN_ERR "Thermal: CPU%d: Unexpected LVT TMR interrupt!\n",
- smp_processor_id());
- add_taint(TAINT_MACHINE_CHECK);
-}
-
-/* P4/Xeon Thermal transition interrupt handler */
static void intel_thermal_interrupt(struct cpu_user_regs *regs)
{
uint64_t msr_content;
unsigned int cpu = smp_processor_id();
static DEFINE_PER_CPU(s_time_t, next);
+ ack_APIC_irq();
+
if (NOW() < per_cpu(next, cpu))
return;
}
}
-/* Thermal interrupt handler for this CPU setup */
-static void (*__read_mostly vendor_thermal_interrupt)(
- struct cpu_user_regs *regs) = unexpected_thermal_interrupt;
-
-void thermal_interrupt(struct cpu_user_regs *regs)
-{
- ack_APIC_irq();
- vendor_thermal_interrupt(regs);
-}
-
/* Thermal monitoring depends on APIC, ACPI and clock modulation */
static int intel_thermal_supported(struct cpuinfo_x86 *c)
{
uint32_t val;
int tm2 = 0;
unsigned int cpu = smp_processor_id();
+ static uint8_t thermal_apic_vector;
if (!intel_thermal_supported(c))
return; /* -ENODEV */
return; /* -EBUSY */
}
+ alloc_direct_apic_vector(&thermal_apic_vector, intel_thermal_interrupt);
+
/* The temperature transition interrupt handler setup */
- val = THERMAL_APIC_VECTOR; /* our delivery vector */
+ val = thermal_apic_vector; /* our delivery vector */
val |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */
apic_write_around(APIC_LVTTHMR, val);
rdmsrl(MSR_IA32_THERM_INTERRUPT, msr_content);
wrmsrl(MSR_IA32_THERM_INTERRUPT, msr_content | 0x03);
- /* ok we're good to go... */
- vendor_thermal_interrupt = intel_thermal_interrupt;
-
rdmsrl(MSR_IA32_MISC_ENABLE, msr_content);
wrmsrl(MSR_IA32_MISC_ENABLE, msr_content | (1ULL<<3));
clear_cmci();
}
+static void cmci_interrupt(struct cpu_user_regs *regs)
+{
+ mctelem_cookie_t mctc;
+ struct mca_summary bs;
+
+ ack_APIC_irq();
+
+ mctc = mcheck_mca_logout(
+ MCA_CMCI_HANDLER, __get_cpu_var(mce_banks_owned), &bs, NULL);
+
+ if (bs.errcnt && mctc != NULL) {
+ if (dom0_vmce_enabled()) {
+ mctelem_commit(mctc);
+ mce_printk(MCE_VERBOSE, "CMCI: send CMCI to DOM0 through virq\n");
+ send_global_virq(VIRQ_MCA);
+ } else {
+ x86_mcinfo_dump(mctelem_dataptr(mctc));
+ mctelem_dismiss(mctc);
+ }
+ } else if (mctc != NULL)
+ mctelem_dismiss(mctc);
+}
+
static void intel_init_cmci(struct cpuinfo_x86 *c)
{
u32 l, apic;
int cpu = smp_processor_id();
+ static uint8_t cmci_apic_vector;
if (!mce_available(c) || !cmci_support) {
if (opt_cpu_info)
return;
}
- apic = CMCI_APIC_VECTOR;
+ alloc_direct_apic_vector(&cmci_apic_vector, cmci_interrupt);
+
+ apic = cmci_apic_vector;
apic |= (APIC_DM_FIXED | APIC_LVT_MASKED);
apic_write_around(APIC_CMCI, apic);
mce_set_owner();
}
-void cmci_interrupt(struct cpu_user_regs *regs)
-{
- mctelem_cookie_t mctc;
- struct mca_summary bs;
-
- ack_APIC_irq();
-
- mctc = mcheck_mca_logout(
- MCA_CMCI_HANDLER, __get_cpu_var(mce_banks_owned), &bs, NULL);
-
- if (bs.errcnt && mctc != NULL) {
- if (dom0_vmce_enabled()) {
- mctelem_commit(mctc);
- mce_printk(MCE_VERBOSE, "CMCI: send CMCI to DOM0 through virq\n");
- send_global_virq(VIRQ_MCA);
- } else {
- x86_mcinfo_dump(mctelem_dataptr(mctc));
- mctelem_dismiss(mctc);
- }
- } else if (mctc != NULL)
- mctelem_dismiss(mctc);
-}
-
/* MCA */
static int mce_is_broadcast(struct cpuinfo_x86 *c)
#define INVALIDATE_TLB_VECTOR 0xfd
#define EVENT_CHECK_VECTOR 0xfc
#define CALL_FUNCTION_VECTOR 0xfb
-#define THERMAL_APIC_VECTOR 0xfa
-#define LOCAL_TIMER_VECTOR 0xf9
-#define PMU_APIC_VECTOR 0xf8
-#define CMCI_APIC_VECTOR 0xf7
+#define LOCAL_TIMER_VECTOR 0xfa
+#define PMU_APIC_VECTOR 0xf9
/*
* High-priority dynamically-allocated vectors. For interrupts that
* must be higher priority than any guest-bound interrupt.
*/
#define FIRST_HIPRIORITY_VECTOR 0xf0
-#define LAST_HIPRIORITY_VECTOR 0xf6
+#define LAST_HIPRIORITY_VECTOR 0xf8
/* Legacy PIC uses vectors 0xe0-0xef. */
#define FIRST_LEGACY_VECTOR 0xe0